%reload_ext autoreload
%autoreload 2
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
# https://github.com/opencv/opencv/blob/master/samples/python/calibrate.py
import cv2, os, numpy as np, glob, matplotlib.pyplot as plt, tqdm
def drawChessboardCorners(image, pattern_size, scale_corners, ret, is_show = True):
if ret==False: return image
scale_corners = np.squeeze(scale_corners)
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (255, 128, 0), (255, 0, 128), (128, 255, 0)]
circle_size = 25
line_size = 25
new_image = image.copy()
rows, cols = pattern_size[0], pattern_size[1]
centerp = center = None
for r in range(rows):
for c in range(cols):
centerp = center
center = tuple(scale_corners[r * cols + c, :].astype(np.int))
if centerp is not None:
cv2.line(new_image, center, centerp, colors[r], line_size)
# if
new_image = cv2.circle(new_image, center, circle_size, colors[r], -1)
# for
if is_show == True:
plt.figure(figsize=(12, 12))
plt.imshow(new_image[...,::-1])
plt.axis("off")
# if
return new_image
# drawChessboardCorners
search_pattern = "./data/calibration/Note8a/*.jpg"
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
pattern_size = (7, 7)
scale = 4
square_size = 1.0
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((np.prod(pattern_size), 3), np.float32)
objp[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
objp *= square_size
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
path_images = glob.glob(search_pattern)
path_images.sort()
obj_points = []
img_points = []
images = []
cols = 2
rows = (len(path_images) + cols - 1) // cols
idx_image = 0
for r in range(rows):
plt.figure(figsize = (20, 7))
for c in range(cols):
idx_pos = r * cols + c
if idx_pos >= len(path_images): continue
fname = path_images[idx_pos]
org_image = cv2.imread(fname)
org_gray = cv2.cvtColor(org_image, cv2.COLOR_BGR2GRAY)
org_height, org_width = org_image.shape[:2]
new_height, new_width = org_height // scale, org_width // scale
scale_image = cv2.resize(org_image, (new_width, new_height), cv2.INTER_CUBIC)
gray = cv2.cvtColor(scale_image, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, pattern_size, None)
# If found, add object points, image points (after refining them)
if ret == True:
corners = np.squeeze(corners)
corners2 = cv2.cornerSubPix(gray, corners, (pattern_size[0]//2, pattern_size[1]// 2), (-1,-1),criteria)
scale_corners = cv2.cornerSubPix(org_gray, corners2 * scale, (pattern_size[0]//2, pattern_size[1]// 2), (-1,-1),criteria)
# Draw and display the corners
ret_image = drawChessboardCorners(org_image, pattern_size, scale_corners, ret, is_show = False)
images.append(org_image)
obj_points.append(objp)
img_points.append(scale_corners)
plt.subplot(1, cols, c + 1), plt.imshow(ret_image[...,::-1])
plt.title(f"Image {idx_image}")
plt.axis("off")
idx_image = idx_image + 1
# if
# for
plt.show()
# for
idx_filter = [idx for idx in range(len(img_points)) if idx not in [15, 16, 18, 19]]
img_points = np.squeeze(np.array(img_points))
obj_points = np.array(obj_points)
img_points_filter = img_points[idx_filter]
obj_points_filter = obj_points[idx_filter]
# calculate camera distortion
rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv2.calibrateCamera(obj_points, img_points,
(org_width, org_height), None, None)
print("\nRMS:", rms)
print("camera matrix:\n", camera_matrix)
print("distortion coefficients: ", dist_coefs.ravel())
obj_points_test = obj_points # obj_points_filter
img_points_test = img_points # img_points_filter
mean_error = 0
for i in range(len(obj_points_test)):
img_points2, _ = cv2.projectPoints(obj_points_test[i], _rvecs[i], _tvecs[i], camera_matrix, dist_coefs)
img_points2 = np.squeeze(img_points2)
error = cv2.norm(img_points_test[i],img_points2, cv2.NORM_L2)/len(img_points2)
mean_error += error
# for
print("total error: ", mean_error/len(obj_points_test))
np.savez("calibrate0.npz",
camera_matrix = camera_matrix,
dist_coefs = dist_coefs,
mean_error = mean_error)
# calculate camera distortion
rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv2.calibrateCamera(obj_points_filter, img_points_filter,
(org_width, org_height), None, None)
print("\nRMS:", rms)
print("camera matrix:\n", camera_matrix)
print("distortion coefficients: ", dist_coefs.ravel())
obj_points_test = obj_points_filter
img_points_test = img_points_filter
mean_error = 0
for i in range(len(obj_points_test)):
img_points2, _ = cv2.projectPoints(obj_points_test[i], _rvecs[i], _tvecs[i], camera_matrix, dist_coefs)
img_points2 = np.squeeze(img_points2)
error = cv2.norm(img_points_test[i],img_points2, cv2.NORM_L2)/len(img_points2)
mean_error += error
# for
print("total error: ", mean_error/len(obj_points_test))
np.savez("calibrate1.npz",
camera_matrix = camera_matrix,
dist_coefs = dist_coefs,
mean_error = mean_error)
cols = 2
rows = (len(images) + cols - 1) // cols
for r in range(rows):
plt.figure(figsize = (20, 7))
for c in range(cols):
idx_pos = r * cols + c
if idx_pos >= len(images): continue
img = images[idx_pos]
h, w = img.shape[:2]
new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w,h), 1, (w,h))
# undistort
dst = cv2.undistort(img, camera_matrix, dist_coefs, None, new_camera_matrix)
# crop the image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
plt.subplot(1, cols, c + 1)
plt.title(f"Image {idx_pos}")
plt.imshow(dst[...,::-1])
plt.axis("off")
# for
plt.show()
# for